Filtering and Hybrid Images

Om Shri Prasath EE17B113

Importing the required libraries

  • Numpy - For matrix manipulations (mainly used in Non-Max Supression and Calculating Magnitude and Angle)
  • Scipy
    • Stats : Used for generating the Gaussian Kernel
    • Signal : Used to check best method for convolution in 2d case
    • Ndimage : Used for image rotation in Q3 Part 6
  • Matplotlib - For displaying image
  • OpenCV - For reading images from source
In [1]:
# Importing the required libraries

import numpy as np
import scipy.stats as st
import scipy.signal as sg
import matplotlib.pyplot as plt
import scipy.ndimage as nd
import cv2

# To remove unnecessary warnings
import warnings

warnings.filterwarnings("ignore")

# Note : Used for autoformatting the notebook, remove if causing any error
%load_ext nb_black
In [2]:
# Parameters for good matplotlib output

plt.rcParams["figure.autolayout"] = False
plt.rcParams["figure.figsize"] = 9, 6
plt.rcParams["axes.labelsize"] = 18
plt.rcParams["axes.titlesize"] = 20
plt.rcParams["font.size"] = 16
plt.rcParams["lines.linewidth"] = 2.0
plt.rcParams["lines.markersize"] = 6
plt.rcParams["legend.fontsize"] = 18
plt.rcParams["legend.numpoints"] = 2
plt.rcParams["legend.loc"] = "best"
plt.rcParams["legend.fancybox"] = True
plt.rcParams["legend.shadow"] = True
plt.rcParams["text.usetex"] = True
plt.rcParams["font.family"] = "serif"
plt.rcParams["font.serif"] = "cm"
plt.rcParams["text.latex.preamble"] = [r"\usepackage{amsmath}", r"\usepackage{amssymb}"]

Filtering on 1-D Array

Creating the signal generator using numpy

In [3]:
# Function to generate signal X given range k
def x(k):
    x = np.zeros(k.shape)
    x[(k > -1) & (k < 16)] = 3 + np.sin(2 * k[(k > -1) & (k < 16)] * np.pi / 15)
    return x

Plotting the signal

In [4]:
# Creating the signal range
k = np.arange(0, 16)

# Generating the signal and plotting it

plt.title(r"Input Signal $x_k$")
plt.plot(k, x(k), "bo")
plt.xlabel("k")
plt.ylabel("x")
plt.show()

Creating filter 1 : $y_k = x_{k+1}-x_k$

In [5]:
def y_1(k):
    return x(k + 1) - x(k)
In [6]:
plt.title(r"Filtered Signal $y_k = x_{k+1}-x_k$")
plt.plot(k, y_1(k), "bo")
plt.xlabel("k")
plt.ylabel("y")
plt.show()

Creating filter 2 : $y_k = x_{k}-\bar{X}$ where $\bar{X} = \dfrac{1}{L+1}\sum\limits_{i=0}^{L}x_i$

In [7]:
def y_2(k):
    return x(k) - np.average(x(k))
In [8]:
plt.title(r"Filtered Signal $y_k = x_{k}-\bar{X}$")
plt.plot(k, y_2(k), "bo")
plt.xlabel("k")
plt.ylabel("y")
plt.show()

Creating filter 3 : $y_k = median(\{x_l : l \in [k-2,k+2]\})$

In [9]:
def y_3(k):
    return np.array([np.median(x(np.arange(l - 2, l + 3))) for l in k])
In [10]:
plt.title(r"Filtered Signal $y_k = median(\{x_l : l \in [k-2,k+2]\})$")
plt.plot(k, y_3(k), "bo")
plt.xlabel("k")
plt.ylabel("y")
plt.show()

Creating filter 4 : $y_k = x_{k+0.5}-x_{k-0.5}$

Using linear interpolation :

$x_{k+0.5} = x_{k}+\dfrac{(x_{k+1} - x_{k})}{2} = \dfrac{x_{k+1}+x_{k}}{2} \implies$ The values at the middle (0.5) are just the averages of the nearby points.

$\implies y_k = \dfrac{x_{k}+x_{k+1} - x_{k}-x_{k-1}}{2} = \dfrac{x_{k+1}-x_{k-1}}{2}$

In [11]:
def y_4(k):
    return (x(k + 1) - x(k - 1)) / 2
In [12]:
plt.title(r"Filtered Signal $y_k = x_{k+0.5}-x_{k-0.5}$")
plt.plot(k, y_4(k), "bo")
plt.xlabel("k")
plt.ylabel("y")
plt.show()

Creating filter 5 : $y_k = |x_{k+0.5}-x_{k-0.5}|$

(We use the same transform as above)

In [13]:
def y_5(k):
    return np.abs((x(k + 1) - x(k - 1)) / 2)
In [14]:
plt.title(r"Filtered Signal $y_k = |x_{k+0.5}-x_{k-0.5}|$")
plt.plot(k, y_5(k), "bo")
plt.xlabel("k")
plt.ylabel("y")
plt.show()

Creating filter 6 : $y_k = \dfrac{1}{5}\sum\limits_{i=k-2}^{k+2} x_i$

In [15]:
def y_6(k):
    return np.array([np.sum(x(np.arange(l - 2, l + 3))) / 5 for l in k])
In [16]:
plt.title(r"Filtered Signal $y_k = \frac{1}{5}\sum_{i=k-2}^{k+2} x_i$")

plt.plot(k, y_6(k), "bo")
plt.xlabel("k")
plt.ylabel("y")
plt.show()

The filters which are LTI are $y_1, y_4$ and $y_6$ only. Other filters do not satisfy the conditions to be LTI.

Defining function to do 1D convolution of two signals.

In [17]:
def convolve_1d(x, h):

    # Switching if length of x is lesser than h
    if x.shape[0] < h.shape[0]:
        temp = x
        x = h
        h = temp

    # Padding x for handling edge cases
    x_padded = np.hstack([np.zeros((len(h) // 2,)), x, np.zeros((len(h) // 2,))])

    # Reversing the filter for convolution operation
    h = h[::-1]
    y = []

    # Convolution operation
    for i in range(len(x)):
        f = 0
        for k in range(len(h)):
            f += x_padded[i + k] * h[k]
        y.append(f)

    return np.array(y)

The convolutional filter for $y_1 \to h_1 = \begin{bmatrix}1 & -1 & 0\end{bmatrix}$

In [18]:
h_1 = np.array([1, -1, 0])

The convolutional filter for $y_4 \to h_4 = \begin{bmatrix}0.5 & 0 & -0.5\end{bmatrix}$

In [19]:
h_4 = np.array([0.5, 0, -0.5])

The convolutional filter for $y_6 \to h_6 = \begin{bmatrix}0.2 & 0.2 & 0.2 & 0.2 & 0.2\end{bmatrix}$

In [20]:
h_6 = np.array([0.2, 0.2, 0.2, 0.2, 0.2])

Applying the filter $h_1$ and visualizing the output and comparing with direct computation

In [21]:
y_1_c = convolve_1d(x(k), h_1)

fig, axs = plt.subplots(1, 2, figsize=(14, 6))
plt.suptitle(r"$y_k = x_{k+1}-x_k$")
axs[0].plot(k, y_1(k), "bo")
axs[0].set_title("Original")
axs[0].set_xlabel("k")
axs[0].set_ylabel("y")

axs[1].plot(k, y_1_c, "bo")
axs[1].set_title("Using Convolution")
axs[1].set_xlabel("k")
axs[1].set_ylabel("y")

plt.show()

Applying the filter $h_4$ and visualizing the output and comparing with direct computation

In [22]:
y_4_c = convolve_1d(x(k), h_4)

fig, axs = plt.subplots(1, 2, figsize=(14, 6))
plt.suptitle(r"$y_k = x_{k+0.5}-x_{k-0.5}$")
axs[0].plot(k, y_4(k), "bo")
axs[0].set_title("Original")
axs[0].set_xlabel("k")
axs[0].set_ylabel("y")

axs[1].plot(k, y_4_c, "bo")
axs[1].set_title("Using Convolution")
axs[1].set_xlabel("k")
axs[1].set_ylabel("y")

plt.show()

Applying the filter $h_6$ and visualizing the output and comparing with direct computation

In [23]:
y_6_c = convolve_1d(x(k), h_6)

fig, axs = plt.subplots(1, 2, figsize=(14, 6))
plt.suptitle(r"$y_k = \frac{1}{5}\sum\limits_{i=k-2}^{k+2} x_i$")
axs[0].plot(k, y_6(k), "bo")
axs[0].set_title("Original")
axs[0].set_xlabel("k")
axs[0].set_ylabel("y")

axs[1].plot(k, y_6_c, "bo")
axs[1].set_title("Using Convolution")
axs[1].set_xlabel("k")
axs[1].set_ylabel("y")

plt.show()

Convolution using Fourier Transform

We will try to transform the convolution in space domain into multiplication in frequency domain and transform back to time domain to check the output.

Computing the convolution using multiplication in frequency domain for $h_1$ and visualizing the output

In [24]:
y_1_ft = np.fft.ifft(
    np.fft.fft(np.concatenate([h_1, np.zeros(len(k) - len(h_1))])) * np.fft.fft(x(k))
)

fig, axs = plt.subplots(1, 2, figsize=(14, 6))
plt.suptitle(r"$y_k = x_{k+1}-x_k$")
axs[0].plot(k, y_1(k), "bo")
axs[0].set_title("Original")
axs[0].set_xlabel("k")
axs[0].set_ylabel("y")

axs[1].plot(k, y_1_ft, "bo")
axs[1].set_title("Using Fourier Transoform")
axs[1].set_xlabel("k")
axs[1].set_ylabel("y")

plt.show()

Computing the convolution using multiplication in frequency domain for $h_4$ and visualizing the output

In [25]:
y_4_ft = np.fft.ifft(
    np.fft.fft(np.concatenate([h_4, np.zeros(len(k) - len(h_4))])) * np.fft.fft(x(k))
)

fig, axs = plt.subplots(1, 2, figsize=(14, 6))
plt.suptitle(r"$y_k = x_{k+0.5}-x_{k-0.5}$")
axs[0].plot(k, y_4(k), "bo")
axs[0].set_title("Original")
axs[0].set_xlabel("k")
axs[0].set_ylabel("y")

axs[1].plot(k, y_4_ft, "bo")
axs[1].set_title("Using Fourier Transoform")
axs[1].set_xlabel("k")
axs[1].set_ylabel("y")

plt.show()

Computing the convolution using multiplication in frequency domain for $h_6$ and visualizing the output

In [26]:
y_6_ft = np.fft.ifft(
    np.fft.fft(np.concatenate([h_6, np.zeros(len(k) - len(h_6))])) * np.fft.fft(x(k))
)

fig, axs = plt.subplots(1, 2, figsize=(14, 6))
plt.suptitle(r"$y_k = \frac{1}{5}\sum\limits_{i=k-2}^{k+2} x_i$")
axs[0].plot(k, y_6(k), "bo")
axs[0].set_title("Original")
axs[0].set_xlabel("k")
axs[0].set_ylabel("y")

axs[1].plot(k, y_6_ft, "bo")
axs[1].set_title("Using Fourier Transoform")
axs[1].set_xlabel("k")
axs[1].set_ylabel("y")

plt.show()

We see that none of the above outputs from Fourier Transform match the output of applying the filter. This is because the multiplication in frequency domain does not match directly the linear convolution in space domain, rather it matches circular convolution in space domain. To get normal linear convolution from circular convolution, we need to first pad the input signal by half the length of the filter signal on both sides, then multiply the Fourier Transforms of the signals and then clip out the starting part of the output which is not required to get the required output.

In [27]:
# Function to do linear convolution using fourier transform outputs
def fft_convolve_1d(x, h):
    x_padded = np.hstack([np.zeros(len(h) // 2,), x, np.zeros(len(h) // 2,)])
    y = np.fft.ifft(
        np.fft.fft(np.concatenate([h, np.zeros(len(x_padded) - len(h))]))
        * np.fft.fft(x_padded)
    )[(len(h) // 2) * 2 :]

    return y

Applying the modified convolution using Fourier Transform and visualizing the output for $h_1$

In [28]:
y_1_fft_con = fft_convolve_1d(x(k), h_1)

fig, axs = plt.subplots(1, 2, figsize=(14, 6))
plt.suptitle(r"$y_k = x_{k+1}-x_k$")
axs[0].plot(k, y_1(k), "bo")
axs[0].set_title("Original")
axs[0].set_xlabel("k")
axs[0].set_ylabel("y")

axs[1].plot(k, y_1_fft_con, "bo")
axs[1].set_title("Using Fourier Transoform(Changed)")
axs[1].set_xlabel("k")
axs[1].set_ylabel("y")

plt.show()

Applying the modified convolution using Fourier Transform and visualizing the output for $h_4$

In [29]:
y_4_fft_con = fft_convolve_1d(x(k), h_4)

fig, axs = plt.subplots(1, 2, figsize=(14, 6))
plt.suptitle(r"$y_k = x_{k+0.5}-x_{k-0.5}$")
axs[0].plot(k, y_4(k), "bo")
axs[0].set_title("Original")
axs[0].set_xlabel("k")
axs[0].set_ylabel("y")

axs[1].plot(k, y_4_fft_con, "bo")
axs[1].set_title("Using Fourier Transoform(Changed)")
axs[1].set_xlabel("k")
axs[1].set_ylabel("y")

plt.show()

Applying the modified convolution using Fourier Transform and visualizing the output for $h_6$

In [30]:
y_6_fft_con = fft_convolve_1d(x(k), h_6)

fig, axs = plt.subplots(1, 2, figsize=(14, 6))
plt.suptitle(r"$y_k = \frac{1}{5}\sum\limits_{i=k-2}^{k+2} x_i$")
axs[0].plot(k, y_6(k), "bo")
axs[0].set_title("Original")
axs[0].set_xlabel("k")
axs[0].set_ylabel("y")

axs[1].plot(k, y_6_fft_con, "bo")
axs[1].set_title("Using Fourier Transoform(Changed)")
axs[1].set_xlabel("k")
axs[1].set_ylabel("y")

plt.show()

Hybrid Images

Importing the function from helpers.py

In [31]:
from helpers import load_image, save_image, vis_hybrid_image

Finding the best method to do 2D convolution for given question

In [32]:
img1 = load_image("data/ex01/bicycle.bmp")
sg.choose_conv_method(img1, np.ones((3, 3)))
Out[32]:
'fft'

Since the output above is given as 'fft', we will use fft for convolution in 2D case

Function to do 2D convolution using FFT

In [33]:
def convolve_2d(img, fil):

    # Finding the padding value for the filter

    ## Horizontal Padding
    hpad1 = (img.shape[0] - fil.shape[0]) // 2

    ## Handing even/odd cases
    if (img.shape[0] - fil.shape[0]) % 2:
        hpad2 = hpad1 + 1
    else:
        hpad2 = hpad1

    ## Vertical Padding
    vpad1 = (img.shape[1] - fil.shape[1]) // 2

    ## Handling even/odd cases
    if (img.shape[1] - fil.shape[1]) % 2:
        vpad2 = vpad1 + 1
    else:
        vpad2 = vpad1

    # Padding the filter
    fil_new = np.pad(fil, [(hpad1, hpad2), (vpad1, vpad2)],)

    # Transforming the image and filter to Fourier Domain
    fr1 = np.fft.fft2(img)
    fr2 = np.fft.fft2(
        np.flipud(np.fliplr(fil_new))
    )  # Flipping the filter to conform to convolution similar to CNN

    m, n = fr1.shape

    # Finding the convolution output in Fourier Domain
    cc = np.real(np.fft.ifft2(fr1 * fr2))

    # Transforming the output to match the actual convolution output
    cc = np.roll(cc, -m // 2 + 1, axis=0)
    cc = np.roll(cc, -n // 2 + 1, axis=1)

    # Clipping the outputs
    cc[cc > 1] = 1
    cc[cc < 0] = 0

    return cc

Function to convolve an image with a given filter

In [34]:
def filter_image(img, fil):

    # Checking for correctness of filter
    if fil.shape[0] % 2 == 0 or fil.shape[1] % 2 == 0:
        raise Exception("Please enter correct filter")

    # Applying filter on color image using the convolution function defined above
    if len(img.shape) != 2 and img.shape[2] == 3:
        out = []
        for i in range(3):
            out.append(convolve_2d(img[:, :, i], fil))
        return np.dstack(out)
    # Appying filter on grayscale image using the convolution function defined above
    else:
        return convolve_2d(img, fil)

Function to create a gaussian kernel

In [35]:
def gaussian_kernel(sig):

    # Creating kernel size for given sigma value

    if (int(6 * sig) + 1) % 2 == 0:
        kern = int(6 * sig)
    else:
        kern = int(6 * sig) + 1

    # Creating an 1d Space
    x = np.linspace(-2 / sig, 2 / sig, kern)

    # Generating 1d Gaussian Kernel by taking the difference of CDF of the above space
    gauss_1d = st.norm.pdf(x)

    # Creating the 2d Gaussian Kernel by multiplying two 1d Gaussian Kernels
    gauss_2d = np.outer(gauss_1d, gauss_1d)

    # Normalizing the Gaussian Kernel
    gauss_kernel = gauss_2d / gauss_2d.sum()

    return gauss_kernel

We will define functions to create the hybrid filters :

  • low_pass : Gives the low-pass filtered output of image.
  • high_pass : Gives the high-pass filtered output of image.
  • hybrid_image : Combines the above two functions to create the hybrid image.
In [36]:
def low_pass(img, freq):
    # Defining the Gaussian kernel
    kernel = gaussian_kernel(freq)

    # Filtering the image with above kernel
    out = filter_image(img, kernel)

    # Clipping and normalizing the output
    out = out / out.max()
    out[out < 0] = 0
    out[out > 1] = 1

    return out


def high_pass(img, freq):
    # Defining the Gaussian kernel
    kernel = gaussian_kernel(freq)

    # Filtering the image with above kernel
    out = img - filter_image(img, kernel)

    # Clipping and normalizing the output
    out = out / out.max() + 0.5
    out[out < 0] = 0
    out[out > 1] = 1

    return out


def hybrid_image(img1, img2, freq1, freq2):

    # Finding the low_pass and high_pass images
    img_low = low_pass(img1, freq1)
    img_high = high_pass(img2, freq2)

    # Combining the high_pass and low_pass
    return (img_low + img_high) / 2

Bicycle and Motorcycle

In [37]:
img1 = load_image("data/ex01/bicycle.bmp")
img2 = load_image("data/ex01/motorcycle.bmp")

plt.figure(figsize=(15, 10))
plt.title("Bicycle and Motorcycle")
plt.imshow(np.hstack([img1, img2]))
plt.axis("off")
plt.show()
In [38]:
img_hyb = hybrid_image(img1, img2, 5, 3)

plt.figure(figsize=(15, 10))

plt.title("Highpass Motorcycle and Lowpass Bicycle")
plt.imshow(vis_hybrid_image(img_hyb))
plt.axis("off")
plt.show()
In [39]:
img_hyb = hybrid_image(img2, img1, 3, 1.5)

plt.figure(figsize=(15, 10))

plt.title("Highpass Bicycle and Lowpass Motorcycle")
plt.imshow(vis_hybrid_image(img_hyb))
plt.axis("off")
plt.show()

Bird and Plane

In [40]:
img1 = load_image("data/ex02/bird.bmp")
img2 = load_image("data/ex02/plane.bmp")

plt.figure(figsize=(15, 10))
plt.title("Bird and Plane")
plt.imshow(np.hstack([img1, img2]))
plt.axis("off")
plt.show()
In [41]:
img_hyb = hybrid_image(img1, img2, 4, 2)

plt.figure(figsize=(15, 10))
plt.title("Highpass Plane and Lowpass Bird")
plt.imshow(vis_hybrid_image(img_hyb))
plt.axis("off")
plt.show()
In [42]:
img_hyb = hybrid_image(img2, img1, 5, 1)

plt.figure(figsize=(15, 10))
plt.title("Highpass Bird and Lowpass Plane")
plt.imshow(vis_hybrid_image(img_hyb))
plt.axis("off")
plt.show()

Cat and Dog

In [43]:
img1 = load_image("data/ex03/cat.bmp")
img2 = load_image("data/ex03/dog.bmp")

plt.figure(figsize=(15, 10))
plt.title("Cat and Dog")
plt.imshow(np.hstack([img1, img2]))
plt.axis("off")
plt.show()
In [44]:
img_hyb = hybrid_image(img1, img2, 3, 4)

plt.figure(figsize=(15, 10))
plt.title("Highpass Dog and Lowpass Cat")
plt.imshow(vis_hybrid_image(img_hyb))
plt.axis("off")
plt.show()
In [45]:
img_hyb = hybrid_image(img2, img1, 1.5, 2)

plt.figure(figsize=(15, 10))
plt.title("Highpass Cat and Lowpass Dog")
plt.imshow(vis_hybrid_image(img_hyb))
plt.axis("off")
plt.show()

Einstein and Marilyn

In [46]:
img1 = load_image("data/ex04/einstein.bmp")
img2 = load_image("data/ex04/marilyn.bmp")

plt.figure(figsize=(15, 10))
plt.title("Einstein and Marilyn")
plt.imshow(np.hstack([img1, img2]))
plt.axis("off")
plt.show()
In [47]:
img_hyb = hybrid_image(img1, img2, 1, 1)

plt.figure(figsize=(15, 10))
plt.title("Highpass Marilyn and Lowpass Einstein")
plt.imshow(vis_hybrid_image(img_hyb))
plt.axis("off")
plt.show()
In [48]:
img_hyb = hybrid_image(img2, img1, 2, 1.5)

plt.figure(figsize=(15, 10))
plt.title("Highpass Einstein and Lowpass Marilyn")
plt.imshow(vis_hybrid_image(img_hyb))
plt.axis("off")
plt.show()

Fish and Submarine

In [49]:
img1 = load_image("data/ex05/fish.bmp")
img2 = load_image("data/ex05/submarine.bmp")

plt.figure(figsize=(15, 10))
plt.figure(figsize=(15, 10))
plt.title("Fish and Submarine")
plt.imshow(np.hstack([img1, img2]))
plt.axis("off")
plt.show()
<Figure size 1080x720 with 0 Axes>
In [50]:
img_hyb = hybrid_image(img1, img2, 3, 2)

plt.figure(figsize=(15, 10))
plt.title("Highpass Submarine and Lowpass Fish")
plt.imshow(vis_hybrid_image(img_hyb))
plt.axis("off")
plt.show()
In [51]:
img_hyb = hybrid_image(img2, img1, 2, 1)

plt.figure(figsize=(15, 10))
plt.title("Highpass Fish and Lowpass Submarine")
plt.imshow(vis_hybrid_image(img_hyb))
plt.axis("off")
plt.show()

Durian and Ukulele

In [52]:
img1 = load_image("data/ex06/durian.jpg")
img2 = load_image("data/ex06/ukulele.jpg")

# Preprocessing the ukulele to create hybrid image

# Rotating the image and resizing to durian shape
img2 = cv2.resize(
    nd.rotate(img2, -120, mode="constant", cval=1)[25:-60, :-25, :],
    (img1.shape[1], img1.shape[0]),
)

# Clipping the outputs
img2[img2 < 0] = 0
img2[img2 > 1] = 1

plt.figure(figsize=(15, 10))
plt.imshow(img2.astype(np.uint8))
plt.title("Durian and Ukulele")
plt.imshow(np.hstack([img1, img2]))
plt.axis("off")
plt.show()
In [53]:
img_hyb = hybrid_image(img1, img2, 13, 3)

plt.figure(figsize=(15, 10))
plt.title("Highpass Ukulele and Lowpass Durian")
plt.imshow(vis_hybrid_image(img_hyb))
plt.axis("off")
plt.show()
In [54]:
img_hyb = hybrid_image(img2, img1, 20, 3)

plt.figure(figsize=(15, 7))
plt.title("Highpass Durian and Lowpass Ukulele")
plt.imshow(vis_hybrid_image(img_hyb))
plt.axis("off")
plt.show()
In [55]:
img1 = load_image("data/ex07/einstein.jpg")[10:-40, 25:-25, :]
img2 = load_image("data/ex07/marilyn.jpg")[:-35, :, :]

plt.figure(figsize=(15, 10))

# Reshaping the image to match each other
img1 = cv2.resize(img1, (img2.shape[1], img2.shape[0]))
plt.title("Einstein and Monroe Pt. 2")
plt.imshow(np.hstack([img1, img2]))
plt.show()
In [56]:
img_hyb = hybrid_image(img1, img2, 2, 1)

plt.figure(figsize=(15, 10))
plt.title("Highpass Marilyn and Lowpass Einstein")
plt.imshow(vis_hybrid_image(img_hyb))
plt.axis("off")
plt.show()
In [57]:
img_hyb = hybrid_image(img2, img1, 2, 1.5)

plt.figure(figsize=(15, 10))
plt.title("Highpass Einstein and Lowpass Marilyn")
plt.imshow(vis_hybrid_image(img_hyb))
plt.axis("off")
plt.show()